vmx_initialise_guest_resources().
Signed-off-by: Xin B Li <xin.b.li@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
#include <asm/shadow_64.h>
#endif
-int vmcs_size;
+static int vmcs_size;
+static int vmcs_order;
+static u32 vmcs_revision_id;
-struct vmcs_struct *vmx_alloc_vmcs(void)
+void vmx_init_vmcs_config(void)
{
- struct vmcs_struct *vmcs;
u32 vmx_msr_low, vmx_msr_high;
+ if ( vmcs_size )
+ return;
+
rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
- vmcs_size = vmx_msr_high & 0x1fff;
- vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size));
- memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
- vmcs->vmcs_revision_id = vmx_msr_low;
- return vmcs;
+ vmcs_revision_id = vmx_msr_low;
+
+ vmcs_size = vmx_msr_high & 0x1fff;
+ vmcs_order = get_order_from_bytes(vmcs_size);
}
-static void free_vmcs(struct vmcs_struct *vmcs)
+static struct vmcs_struct *vmx_alloc_vmcs(void)
{
- int order;
+ struct vmcs_struct *vmcs;
- order = get_order_from_bytes(vmcs_size);
- free_xenheap_pages(vmcs, order);
+ if ( (vmcs = alloc_xenheap_pages(vmcs_order)) == NULL )
+ {
+ DPRINTK("Failed to allocate VMCS.\n");
+ return NULL;
+ }
+
+ memset(vmcs, 0, vmcs_size); /* don't remove this */
+ vmcs->vmcs_revision_id = vmcs_revision_id;
+
+ return vmcs;
+}
+
+static void vmx_free_vmcs(struct vmcs_struct *vmcs)
+{
+ free_xenheap_pages(vmcs, vmcs_order);
}
static void __vmx_clear_vmcs(void *info)
{
struct vcpu *v = info;
+
__vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
+
v->arch.hvm_vmx.active_cpu = -1;
v->arch.hvm_vmx.launched = 0;
}
vcpu_unpause(v);
}
+struct vmcs_struct *vmx_alloc_host_vmcs(void)
+{
+ return vmx_alloc_vmcs();
+}
+
+void vmx_free_host_vmcs(struct vmcs_struct *vmcs)
+{
+ vmx_free_vmcs(vmcs);
+}
+
static inline int construct_vmcs_controls(struct arch_vmx_struct *arch_vmx)
{
int error = 0;
- void *io_bitmap_a;
- void *io_bitmap_b;
error |= __vmwrite(PIN_BASED_VM_EXEC_CONTROL,
MONITOR_PIN_BASED_EXEC_CONTROLS);
error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
- /* need to use 0x1000 instead of PAGE_SIZE */
- io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000));
- io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000));
- memset(io_bitmap_a, 0xff, 0x1000);
- /* don't bother debug port access */
- clear_bit(PC_DEBUG_PORT, io_bitmap_a);
- memset(io_bitmap_b, 0xff, 0x1000);
-
- error |= __vmwrite(IO_BITMAP_A, (u64) virt_to_maddr(io_bitmap_a));
- error |= __vmwrite(IO_BITMAP_B, (u64) virt_to_maddr(io_bitmap_b));
-
- arch_vmx->io_bitmap_a = io_bitmap_a;
- arch_vmx->io_bitmap_b = io_bitmap_b;
+ error |= __vmwrite(IO_BITMAP_A, (u64)virt_to_maddr(arch_vmx->io_bitmap_a));
+ error |= __vmwrite(IO_BITMAP_B, (u64)virt_to_maddr(arch_vmx->io_bitmap_b));
return error;
}
}
/*
- * Need to extend to support full virtualization.
+ * the working VMCS pointer has been set properly
+ * just before entering this function.
*/
static int construct_vmcs(struct vcpu *v,
cpu_user_regs_t *regs)
{
struct arch_vmx_struct *arch_vmx = &v->arch.hvm_vmx;
int error;
- long rc;
-
- memset(arch_vmx, 0, sizeof(struct arch_vmx_struct));
- spin_lock_init(&arch_vmx->vmcs_lock);
-
- /*
- * Create a new VMCS
- */
- if (!(arch_vmx->vmcs = vmx_alloc_vmcs())) {
- printk("Failed to create a new VMCS\n");
- return -ENOMEM;
- }
-
- __vmx_clear_vmcs(v);
- vmx_load_vmcs(v);
-
- if ((error = construct_vmcs_controls(arch_vmx))) {
- printk("construct_vmcs: construct_vmcs_controls failed\n");
- rc = -EINVAL;
- goto err_out;
+ if ( (error = construct_vmcs_controls(arch_vmx)) ) {
+ printk("construct_vmcs: construct_vmcs_controls failed.\n");
+ return error;
}
/* host selectors */
- if ((error = construct_vmcs_host())) {
- printk("construct_vmcs: construct_vmcs_host failed\n");
- rc = -EINVAL;
- goto err_out;
+ if ( (error = construct_vmcs_host()) ) {
+ printk("construct_vmcs: construct_vmcs_host failed.\n");
+ return error;
}
/* guest selectors */
- if ((error = construct_init_vmcs_guest(regs))) {
- printk("construct_vmcs: construct_vmcs_guest failed\n");
- rc = -EINVAL;
- goto err_out;
+ if ( (error = construct_init_vmcs_guest(regs)) ) {
+ printk("construct_vmcs: construct_vmcs_guest failed.\n");
+ return error;
}
- if ((error |= __vmwrite(EXCEPTION_BITMAP,
- MONITOR_DEFAULT_EXCEPTION_BITMAP))) {
- printk("construct_vmcs: setting Exception bitmap failed\n");
- rc = -EINVAL;
- goto err_out;
+ if ( (error = __vmwrite(EXCEPTION_BITMAP,
+ MONITOR_DEFAULT_EXCEPTION_BITMAP)) ) {
+ printk("construct_vmcs: setting exception bitmap failed.\n");
+ return error;
}
- if (regs->eflags & EF_TF)
- __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
+ if ( regs->eflags & EF_TF )
+ error = __vm_set_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
else
- __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
+ error = __vm_clear_bit(EXCEPTION_BITMAP, EXCEPTION_BITMAP_DB);
- return 0;
+ return error;
+}
-err_out:
- vmx_destroy_vmcs(v);
- return rc;
+int vmx_create_vmcs(struct vcpu *v)
+{
+ if ( (v->arch.hvm_vmx.vmcs = vmx_alloc_vmcs()) == NULL )
+ return -ENOMEM;
+ __vmx_clear_vmcs(v);
+ return 0;
}
void vmx_destroy_vmcs(struct vcpu *v)
vmx_clear_vmcs(v);
- free_vmcs(arch_vmx->vmcs);
- arch_vmx->vmcs = NULL;
+ free_xenheap_pages(arch_vmx->io_bitmap_a, IO_BITMAP_ORDER);
+ free_xenheap_pages(arch_vmx->io_bitmap_b, IO_BITMAP_ORDER);
- free_xenheap_pages(arch_vmx->io_bitmap_a, get_order_from_bytes(0x1000));
arch_vmx->io_bitmap_a = NULL;
-
- free_xenheap_pages(arch_vmx->io_bitmap_b, get_order_from_bytes(0x1000));
arch_vmx->io_bitmap_b = NULL;
+
+ vmx_free_vmcs(arch_vmx->vmcs);
+ arch_vmx->vmcs = NULL;
}
void vm_launch_fail(unsigned long eflags)
void arch_vmx_do_launch(struct vcpu *v)
{
- int error;
cpu_user_regs_t *regs = ¤t->arch.guest_context.user_regs;
- error = construct_vmcs(v, regs);
- if ( error < 0 )
+ vmx_load_vmcs(v);
+
+ if ( construct_vmcs(v, regs) < 0 )
{
- if (v->vcpu_id == 0) {
- printk("Failed to construct a new VMCS for BSP.\n");
+ if ( v->vcpu_id == 0 ) {
+ printk("Failed to construct VMCS for BSP.\n");
} else {
- printk("Failed to construct a new VMCS for AP %d\n", v->vcpu_id);
+ printk("Failed to construct VMCS for AP %d.\n", v->vcpu_id);
}
domain_crash_synchronous();
}
+
vmx_do_launch(v);
reset_stack_and_jump(vmx_asm_do_vmentry);
}
static void vmx_ctxt_switch_from(struct vcpu *v);
static void vmx_ctxt_switch_to(struct vcpu *v);
-void vmx_final_setup_guest(struct vcpu *v)
+static int vmx_initialize_guest_resources(struct vcpu *v)
{
+ struct domain *d = v->domain;
+ struct vcpu *vc;
+ void *io_bitmap_a, *io_bitmap_b;
+ int rc;
+
v->arch.schedule_tail = arch_vmx_do_launch;
v->arch.ctxt_switch_from = vmx_ctxt_switch_from;
v->arch.ctxt_switch_to = vmx_ctxt_switch_to;
- if ( v->vcpu_id == 0 )
- {
- struct domain *d = v->domain;
- struct vcpu *vc;
+ if ( v->vcpu_id != 0 )
+ return 1;
+ for_each_vcpu ( d, vc )
+ {
/* Initialize monitor page table */
- for_each_vcpu(d, vc)
- vc->arch.monitor_table = pagetable_null();
+ vc->arch.monitor_table = pagetable_null();
- /*
- * Required to do this once per domain
- * XXX todo: add a seperate function to do these.
- */
- memset(&d->shared_info->evtchn_mask[0], 0xff,
- sizeof(d->shared_info->evtchn_mask));
-
- /* Put the domain in shadow mode even though we're going to be using
- * the shared 1:1 page table initially. It shouldn't hurt */
- shadow_mode_enable(d,
- SHM_enable|SHM_refcounts|
- SHM_translate|SHM_external|SHM_wr_pt_pte);
+ memset(&vc->arch.hvm_vmx, 0, sizeof(struct arch_vmx_struct));
+
+ if ( (rc = vmx_create_vmcs(vc)) != 0 )
+ {
+ DPRINTK("Failed to create VMCS for vcpu %d: err=%d.\n",
+ vc->vcpu_id, rc);
+ return 0;
+ }
+
+ spin_lock_init(&vc->arch.hvm_vmx.vmcs_lock);
+
+ if ( (io_bitmap_a = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL )
+ {
+ DPRINTK("Failed to allocate io bitmap b for vcpu %d.\n",
+ vc->vcpu_id);
+ return 0;
+ }
+
+ if ( (io_bitmap_b = alloc_xenheap_pages(IO_BITMAP_ORDER)) == NULL )
+ {
+ DPRINTK("Failed to allocate io bitmap b for vcpu %d.\n",
+ vc->vcpu_id);
+ return 0;
+ }
+
+ memset(io_bitmap_a, 0xff, 0x1000);
+ memset(io_bitmap_b, 0xff, 0x1000);
+
+ /* don't bother debug port access */
+ clear_bit(PC_DEBUG_PORT, io_bitmap_a);
+
+ vc->arch.hvm_vmx.io_bitmap_a = io_bitmap_a;
+ vc->arch.hvm_vmx.io_bitmap_b = io_bitmap_b;
}
+
+ /*
+ * Required to do this once per domain XXX todo: add a seperate function
+ * to do these.
+ */
+ memset(&d->shared_info->evtchn_mask[0], 0xff,
+ sizeof(d->shared_info->evtchn_mask));
+
+ /* Put the domain in shadow mode even though we're going to be using
+ * the shared 1:1 page table initially. It shouldn't hurt */
+ shadow_mode_enable(
+ d, SHM_enable|SHM_refcounts|SHM_translate|SHM_external|SHM_wr_pt_pte);
+
+ return 1;
}
static void vmx_relinquish_guest_resources(struct domain *d)
for_each_vcpu ( d, v )
{
+ vmx_destroy_vmcs(v);
if ( !test_bit(_VCPUF_initialised, &v->vcpu_flags) )
continue;
- vmx_destroy_vmcs(v);
free_monitor_pagetable(v);
kill_timer(&v->arch.hvm_vmx.hlt_timer);
if ( hvm_apic_support(v->domain) && (VLAPIC(v) != NULL) )
__vmxoff();
}
-int vmx_initialize_guest_resources(struct vcpu *v)
-{
- vmx_final_setup_guest(v);
- return 1;
-}
-
void vmx_migrate_timers(struct vcpu *v)
{
struct periodic_time *pt = &(v->domain->arch.hvm_domain.pl_time.periodic_tm);
int start_vmx(void)
{
- struct vmcs_struct *vmcs;
- u32 ecx;
u32 eax, edx;
- u64 phys_vmcs; /* debugging */
+ struct vmcs_struct *vmcs;
/*
* Xen does not fill x86_capability words except 0.
*/
- ecx = cpuid_ecx(1);
- boot_cpu_data.x86_capability[4] = ecx;
+ boot_cpu_data.x86_capability[4] = cpuid_ecx(1);
if (!(test_bit(X86_FEATURE_VMXE, &boot_cpu_data.x86_capability)))
return 0;
rdmsr(IA32_FEATURE_CONTROL_MSR, eax, edx);
- if (eax & IA32_FEATURE_CONTROL_MSR_LOCK) {
- if ((eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0) {
+ if ( eax & IA32_FEATURE_CONTROL_MSR_LOCK )
+ {
+ if ( (eax & IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON) == 0x0 )
+ {
printk("VMX disabled by Feature Control MSR.\n");
return 0;
}
}
- else {
+ else
+ {
wrmsr(IA32_FEATURE_CONTROL_MSR,
IA32_FEATURE_CONTROL_MSR_LOCK |
IA32_FEATURE_CONTROL_MSR_ENABLE_VMXON, 0);
}
- if (!check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS,
- MSR_IA32_VMX_PINBASED_CTLS_MSR))
+ if ( !check_vmx_controls(MONITOR_PIN_BASED_EXEC_CONTROLS,
+ MSR_IA32_VMX_PINBASED_CTLS_MSR) )
return 0;
- if (!check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS,
- MSR_IA32_VMX_PROCBASED_CTLS_MSR))
+ if ( !check_vmx_controls(MONITOR_CPU_BASED_EXEC_CONTROLS,
+ MSR_IA32_VMX_PROCBASED_CTLS_MSR) )
return 0;
- if (!check_vmx_controls(MONITOR_VM_EXIT_CONTROLS,
- MSR_IA32_VMX_EXIT_CTLS_MSR))
+ if ( !check_vmx_controls(MONITOR_VM_EXIT_CONTROLS,
+ MSR_IA32_VMX_EXIT_CTLS_MSR) )
return 0;
- if (!check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS,
- MSR_IA32_VMX_ENTRY_CTLS_MSR))
+ if ( !check_vmx_controls(MONITOR_VM_ENTRY_CONTROLS,
+ MSR_IA32_VMX_ENTRY_CTLS_MSR) )
return 0;
- set_in_cr4(X86_CR4_VMXE); /* Enable VMXE */
+ set_in_cr4(X86_CR4_VMXE);
+
+ vmx_init_vmcs_config();
- if (!(vmcs = vmx_alloc_vmcs())) {
- printk("Failed to allocate VMCS\n");
+ if ( (vmcs = vmx_alloc_host_vmcs()) == NULL )
+ {
+ printk("Failed to allocate host VMCS\n");
return 0;
}
- phys_vmcs = (u64) virt_to_maddr(vmcs);
-
- if (__vmxon(phys_vmcs)) {
+ if ( __vmxon(virt_to_maddr(vmcs)) )
+ {
printk("VMXON failed\n");
+ vmx_free_host_vmcs(vmcs);
return 0;
}
extern int start_vmx(void);
extern void stop_vmx(void);
extern void vmcs_dump_vcpu(void);
-void vmx_final_setup_guest(struct vcpu *v);
-
-void vmx_enter_scheduler(void);
+extern void vmx_init_vmcs_config(void);
enum {
VMX_CPU_STATE_PAE_ENABLED=0,
unsigned char data [0]; /* vmcs size is read from MSR */
};
-extern int vmcs_size;
-
enum {
VMX_INDEX_MSR_LSTAR = 0,
VMX_INDEX_MSR_STAR,
unsigned long shadow_gs;
};
+/* io bitmap is 4KBytes in size */
+#define IO_BITMAP_SIZE 0x1000
+#define IO_BITMAP_ORDER (get_order_from_bytes(IO_BITMAP_SIZE))
+
struct arch_vmx_struct {
/* Virtual address of VMCS. */
struct vmcs_struct *vmcs;
void vmx_do_resume(struct vcpu *);
-struct vmcs_struct *vmx_alloc_vmcs(void);
+struct vmcs_struct *vmx_alloc_host_vmcs(void);
+void vmx_free_host_vmcs(struct vmcs_struct *vmcs);
+
+int vmx_create_vmcs(struct vcpu *v);
void vmx_destroy_vmcs(struct vcpu *v);
void vmx_vmcs_enter(struct vcpu *v);
void vmx_vmcs_exit(struct vcpu *v);